--- global: scrape_interval: 15s # By default, scrape targets every 15 seconds. evaluation_interval: 15s # By default, scrape targets every 15 seconds. # scrape_timeout is set to the global default (10s). external_labels: cluster: 'gangshen-cluster' monitor: "prometheus"
# Load and evaluate rules in this file every 'evaluation_interval' seconds. rule_files: - 'node.rules.yml' - 'blacker.rules.yml' - 'bypass.rules.yml' - 'pd.rules.yml' - 'tidb.rules.yml' - 'tikv.rules.yml'
DEPLOY_DIR={{ deploy_dir }} cd "${DEPLOY_DIR}" || exit 1
# WARNING: This file was auto-generated. Do not edit! # All your edit might be overwritten! exec > >(tee -i -a "{{ alertmanager_log_dir }}/{{ alertmanager_log_filename }}") exec 2>&1
global: # The smarthost and SMTP sender used for mail notifications. smtp_smarthost: 'smtp.qq.com:465' smtp_from: 'xxxxx@qq.com' smtp_auth_username: 'xxxxx@qq.com' smtp_auth_password: '第三方授权码' smtp_require_tls: false
# The Slack webhook URL. # slack_api_url: ''
route: # A default receiver receiver: "db-alert-email"
# The labels by which incoming alerts are grouped together. For example, # multiple alerts coming in for cluster=A and alertname=LatencyHigh would # be batched into a single group. group_by: ['env','instance','alertname','type','group','job']
# When a new group of alerts is created by an incoming alert, wait at # least 'group_wait' to send the initial notification. # This way ensures that you get multiple alerts for the same group that start # firing shortly after another are batched together on the first # notification. group_wait: 30s
# When the first notification was sent, wait 'group_interval' to send a batch # of new alerts that started firing for that group. group_interval: 3m
# If an alert has successfully been sent, wait 'repeat_interval' to # resend them. repeat_interval: 3m